}
}
- if ( has_arch_pdevs(v->domain) )
+ if ( has_arch_mmios(v->domain) )
{
if ( (value & X86_CR0_CD) && !(value & X86_CR0_NW) )
{
static void svm_wbinvd_intercept(void)
{
- if ( has_arch_pdevs(current->domain) )
+ if ( has_arch_mmios(current->domain) )
on_each_cpu(wbinvd_ipi, NULL, 1);
}
static void vmx_wbinvd_intercept(void)
{
- if ( !has_arch_pdevs(current->domain) )
+ if ( !has_arch_mmios(current->domain) )
return;
if ( cpu_has_wbinvd_exiting )
#include <xen/sched.h>
#include <xen/perfc.h>
#include <xen/domain_page.h>
+#include <xen/iocap.h>
#include <asm/page.h>
#include <asm/current.h>
#include <asm/shadow.h>
* For HVM domains with direct access to MMIO areas, set the correct
* caching attributes in the shadows to match what was asked for.
*/
- if ( (level == 1) && is_hvm_domain(d) && has_arch_pdevs(d) &&
+ if ( (level == 1) && is_hvm_domain(d) &&
+ iomem_access_permitted(d, mfn_x(target_mfn), mfn_x(target_mfn) + 1) &&
!is_xen_heap_mfn(mfn_x(target_mfn)) )
{
unsigned int type;
__attribute__ ((noreturn, format (printf, 2, 3)));
#define has_arch_pdevs(d) (!list_empty(&(d)->arch.pdev_list))
+#define has_arch_mmios(d) (!rangeset_is_empty((d)->iomem_caps))
struct mm_struct {
volatile pgd_t * pgd;
} __cacheline_aligned;
#define has_arch_pdevs(d) (!list_empty(&(d)->arch.pdev_list))
+#define has_arch_mmios(d) (!rangeset_is_empty((d)->iomem_caps))
#ifdef CONFIG_X86_64
#define perdomain_pt_pgidx(v) \